This supplementary information presents :
%matplotlib inline
%load_ext autoreload
%autoreload 2
A convenience script model.py allows to run and cache most learning items in this notebooks:
%run model.py
from shl_scripts.shl_experiments import SHL
shl = SHL(**opts)
data = shl.get_data(matname=tag)
shl?
print('# of pixels per patch =', shl.patch_width**2)
print('number of patches, size of patches = ', data.shape)
print('average of patches = ', data.mean(), ' +/- ', data.mean(axis=1).std())
SE = np.sqrt(np.mean(data**2, axis=1))
print('average energy of data = ', SE.mean(), '+/-', SE.std())
#!ls -l {shl.cache_dir}/
!ls -l {shl.cache_dir}/{tag}*
#!ls -ltr {shl.cache_dir}/{tag}*lock*
#!rm {shl.cache_dir}/{tag}*lock*
#!rm {shl.cache_dir}/{tag}*
#!rm {shl.cache_dir}/{tag}*HAP_seed*
#!ls -l {shl.cache_dir}/{tag}*
!ls -ltr {shl.cache_dir}/{tag}*lock*
fname = 'figure_map'
# we cross-validate with 10 different learnings
one_cv = 3 # and pick one to display intermediate results
one_cv = 8 # and pick one to display intermediate results
The actual learning is done in a second object (here dico) from which we can access another set of properties and functions (see the shl_learn.py script):
homeo_methods = ['None', 'OLS', 'HEH']
list_figures = ['show_dico', 'time_plot_error', 'time_plot_logL', 'time_plot_MC', 'show_Pcum']
list_figures = []
dico = {}
for i_cv in range(N_cv):
dico[i_cv] = {}
for homeo_method in homeo_methods:
shl = SHL(homeo_method=homeo_method, seed=seed+i_cv, **opts)
dico[i_cv][homeo_method] = shl.learn_dico(data=data, list_figures=list_figures, matname=tag + '_' + homeo_method + '_seed=' + str(seed+i_cv))
list_figures = ['show_dico']
for i_cv in [one_cv]:
for homeo_method in homeo_methods:
print(hl + hs + homeo_method[:3] + hs + hl)
shl = SHL(homeo_method=homeo_method, seed=seed+i_cv, **opts)
shl.learn_dico(data=data, list_figures=list_figures, matname=tag + '_' + homeo_method + '_seed=' + str(seed+i_cv))
print('size of dictionary = (number of filters, size of imagelets) = ', dico[i_cv][homeo_method].dictionary.shape)
print('average of filters = ', dico[i_cv][homeo_method].dictionary.mean(axis=1).mean(),
'+/-', dico[i_cv][homeo_method].dictionary.mean(axis=1).std())
SE = np.sqrt(np.sum(dico[i_cv][homeo_method].dictionary**2, axis=1))
print('average energy of filters = ', SE.mean(), '+/-', SE.std())
plt.show()
pname = '/tmp/panel_A' #pname = fname + '_A'
from shl_scripts import show_dico
if DEBUG: show_dico(shl, dico[one_cvi_cv][homeo_method], data=data, dim_graph=(2,5))
dim_graph = (2, 9)
colors = ['black', 'orange', 'blue']
homeo_methods
%run model.py
subplotpars = dict(left=0.042, right=1., bottom=0., top=1., wspace=0.05, hspace=0.05,)
fig, axs = plt.subplots(3, 1, figsize=(fig_width/2, fig_width/(1+phi)), gridspec_kw=subplotpars)
for ax, color, homeo_method in zip(axs.ravel(), colors, homeo_methods):
ax.axis(c=color, lw=2, axisbg='w')
ax.set_facecolor('w')
fig, ax = show_dico(shl, dico[one_cv][homeo_method], data=data, dim_graph=dim_graph, fig=fig, ax=ax)
# ax.set_ylabel(homeo_method)
ax.text(-10, 29, homeo_method, fontsize=12, color=color, rotation=90)#, backgroundcolor='white'
for ext in FORMATS: fig.savefig(pname + ext, dpi=dpi_export, bbox_inches='tight')
### TODO put the p_min an p_max value in the filter map
if DEBUG: Image(pname +'.png')
if DEBUG: help(fig.subplots_adjust)
if DEBUG: help(plt.subplots)
if DEBUG: help(matplotlib.gridspec.GridSpec)
pname = '/tmp/panel_B' #fname + '_B'
Flim1, Flim2 = .475, .626
from shl_scripts import time_plot
variable = 'F'
alpha_0, alpha = .3, .15
subplotpars = dict(left=0.2, right=.95, bottom=0.2, top=.95)#, wspace=0.05, hspace=0.05,)
fig, ax = plt.subplots(1, 1, figsize=(fig_width/2, fig_width/(1+phi)), gridspec_kw=subplotpars)
for i_cv in range(N_cv):
for color, homeo_method in zip(colors, homeo_methods):
ax.axis(c='b', lw=2, axisbg='w')
ax.set_facecolor('w')
if i_cv==0:
fig, ax = time_plot(shl, dico[i_cv][homeo_method], variable=variable, unit='bits', color=color, label=homeo_method, alpha=alpha_0, fig=fig, ax=ax)
else:
fig, ax = time_plot(shl, dico[i_cv][homeo_method], variable=variable, unit='bits', color=color, alpha=alpha, fig=fig, ax=ax)
# ax.set_ylabel(homeo_method)
#ax.text(-8, 7*dim_graph[0], homeo_method, fontsize=12, color='k', rotation=90)#, backgroundcolor='white'
ax.legend(loc='best')
ax.set_ylim(Flim1, Flim2)
for ext in FORMATS: fig.savefig(pname + ext, dpi=dpi_export, bbox_inches='tight')
if DEBUG: Image(pname +'.png')
import tikzmagic
%load_ext tikzmagic
#DEBUG = True
if DEBUG: help(tikzmagic)
%%tikz -f pdf --save {fname}.pdf
\draw[white, fill=white] (0.\linewidth,0) rectangle (1.\linewidth, .382\linewidth) ;
\draw [anchor=north west] (.0\linewidth, .382\linewidth) node {\includegraphics[width=.5\linewidth]{/tmp/panel_A}};
\draw [anchor=north west] (.5\linewidth, .382\linewidth) node {\includegraphics[width=.5\linewidth]{/tmp/panel_B}};
\begin{scope}[font=\bf\sffamily\large]
\draw [anchor=west,fill=white] (.0\linewidth, .382\linewidth) node [above right=-3mm] {$\mathsf{A}$};
\draw [anchor=west,fill=white] (.53\linewidth, .382\linewidth) node [above right=-3mm] {$\mathsf{B}$};
\end{scope}
!convert -density {dpi_export} {fname}.pdf {fname}.jpg
!convert -density {dpi_export} {fname}.pdf {fname}.png
#!convert -density {dpi_export} -resize 5400 -units pixelsperinch -flatten -compress lzw -depth 8 {fname}.pdf {fname}.tiff
Image(fname +'.png')
fname = 'figure_HEH'
First collecting data:
list_figures = ['show_Pcum']
dico = {}
for homeo_method in homeo_methods:
print(hl + hs + homeo_method + hs + hl)
shl = SHL(homeo_method=homeo_method, **opts)
#dico[homeo_method] = shl.learn_dico(data=data, list_figures=list_figures, matname=tag + '_' + homeo_method + '_' + str(one_cv))
dico[homeo_method] = shl.learn_dico(data=data, list_figures=list_figures, matname=tag + '_' + homeo_method + '_seed=' + str(seed+one_cv))
plt.show()
dico[homeo_method].P_cum.shape
pname = '/tmp/panel_A' #pname = fname + '_A'
from shl_scripts import plot_P_cum
variable = 'F'
subplotpars = dict(left=0.2, right=.95, bottom=0.2, top=.95)#, wspace=0.05, hspace=0.05,)
fig, ax = plt.subplots(1, 1, figsize=(fig_width/2, fig_width/(1+phi)), gridspec_kw=subplotpars)
for color, homeo_method in zip(colors, homeo_methods):
ax.axis(c='b', lw=2, axisbg='w')
ax.set_facecolor('w')
fig, ax = plot_P_cum(dico[homeo_method].P_cum, ymin=0.93, ymax=1.001,
title=None, suptitle=None, ylabel='non-linear functions',
verbose=False, n_yticks=21, alpha=.02, c=color, fig=fig, ax=ax)
ax.plot([0], [0], lw=1, color=color, label=homeo_method, alpha=.6)
# ax.set_ylabel(homeo_method)
#ax.text(-8, 7*dim_graph[0], homeo_method, fontsize=12, color='k', rotation=90)#, backgroundcolor='white'
ax.legend(loc='lower right')
for ext in FORMATS: fig.savefig(pname + ext, dpi=dpi_export, bbox_inches='tight')
if DEBUG: Image(pname +'.png')
if DEBUG: help(fig.legend)
pname = '/tmp/panel_B' #fname + '_B'
n_jobs = 1
from shl_scripts.shl_experiments import SHL_set
homeo_methods = ['None', 'OLS', 'HEH']
variables = ['eta', 'eta_homeo']
#latex_variables = [r'$\eta$', r'$\eta_\textnormal{homeo}$']
list_figures = []
for homeo_method in homeo_methods:
opts_ = opts.copy()
opts_.update(homeo_method=homeo_method)
experiments = SHL_set(opts_, tag=tag + '_' + homeo_method, base=10)
experiments.run(variables=variables, n_jobs=n_jobs, verbose=0)
import matplotlib.pyplot as plt
subplotpars = dict(left=0.2, right=.95, bottom=0.05, top=.95, wspace=0.5, hspace=0.6,)
x, y = .05, .8 #-.3
fig, axs = plt.subplots(len(variables), 1, figsize=(fig_width/2, fig_width/(1.3+phi)), gridspec_kw=subplotpars, sharey=True)
for i_ax, variable in enumerate(variables):
for color, homeo_method in zip(colors, homeo_methods):
opts_ = opts.copy()
opts_.update(homeo_method=homeo_method)
experiments = SHL_set(opts_, tag=tag + '_' + homeo_method, base=10)
fig, axs[i_ax] = experiments.scan(variable=variable, list_figures=[], display='final', fig=fig, ax=axs[i_ax], color=color, display_variable='F', verbose=0) #, label=homeo_metho
#axs[i_ax].set_xlabel(latex_variables[i_ax]) #variable
#axs[i_ax].text(x, y, variable, transform=axs[i_ax].transAxes)
#axs[i_ax].get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
axs[i_ax].set_ylim(Flim1, Flim2)
axs[0].xaxis.set_label_coords(0.5,-.325)
#fig.legend(loc='lower right')
for ext in FORMATS: fig.savefig(pname + ext, dpi=dpi_export, bbox_inches='tight')
if DEBUG: Image(pname +'.png')
%%tikz -f pdf --save {fname}.pdf
\draw[white, fill=white] (0.\linewidth,0) rectangle (1.\linewidth, .382\linewidth) ;
\draw [anchor=north west] (.0\linewidth, .382\linewidth) node {\includegraphics[width=.5\linewidth]{/tmp/panel_A.pdf}};
\draw [anchor=north west] (.5\linewidth, .382\linewidth) node {\includegraphics[width=.465\linewidth]{/tmp/panel_B.pdf}};
\begin{scope}[font=\bf\sffamily\large]
\draw [anchor=west,fill=white] (.0\linewidth, .382\linewidth) node [above right=-3mm] {$\mathsf{A}$};
\draw [anchor=west,fill=white] (.53\linewidth, .382\linewidth) node [above right=-3mm] {$\mathsf{B}$};
\end{scope}
!convert -density {dpi_export} {fname}.pdf {fname}.jpg
!convert -density {dpi_export} {fname}.pdf {fname}.png
#!convert -density {dpi_export} -resize 5400 -units pixelsperinch -flatten -compress lzw -depth 8 {fname}.pdf {fname}.tiff
Image(fname +'.png')
fname = 'figure_HAP'
colors = ['orange', 'blue', 'red', 'green']
homeo_methods = ['OLS', 'HEH', 'EMP', 'HAP']
list_figures = []
dico = {}
for i_cv in range(N_cv):
dico[i_cv] = {}
for homeo_method in homeo_methods:
shl = SHL(homeo_method=homeo_method, seed=seed+i_cv, **opts)
dico[i_cv][homeo_method] = shl.learn_dico(data=data, list_figures=list_figures, matname=tag + '_' + homeo_method + '_seed=' + str(seed+i_cv))
list_figures = ['show_dico'] if DEBUG else []
for i_cv in [one_cv]:
for homeo_method in homeo_methods:
print(hl + hs + homeo_method + hs + hl)
shl = SHL(homeo_method=homeo_method, seed=seed+i_cv, **opts)
shl.learn_dico(data=data, list_figures=list_figures, matname=tag + '_' + homeo_method + '_seed=' + str(seed+i_cv))
plt.show()
print('size of dictionary = (number of filters, size of imagelets) = ', dico[i_cv][homeo_method].dictionary.shape)
print('average of filters = ', dico[i_cv][homeo_method].dictionary.mean(axis=1).mean(),
'+/-', dico[i_cv][homeo_method].dictionary.mean(axis=1).std())
SE = np.sqrt(np.sum(dico[i_cv][homeo_method].dictionary**2, axis=1))
print('average energy of filters = ', SE.mean(), '+/-', SE.std())
pname = '/tmp/panel_A' #pname = fname + '_A'
subplotpars = dict( left=0.042, right=1., bottom=0., top=1., wspace=0.05, hspace=0.05,)
fig, axs = plt.subplots(3, 1, figsize=(fig_width/2, fig_width/(1+phi)), gridspec_kw=subplotpars)
for ax, color, homeo_method in zip(axs.ravel(), colors[1:], homeo_methods[1:]):
ax.axis(c=color, lw=2, axisbg='w')
ax.set_facecolor('w')
from shl_scripts import show_dico
fig, ax = show_dico(shl, dico[one_cv][homeo_method], data=data, dim_graph=dim_graph, fig=fig, ax=ax)
# ax.set_ylabel(homeo_method)
ax.text(-10, 29, homeo_method, fontsize=12, color=color, rotation=90)#, backgroundcolor='white'
for ext in FORMATS: fig.savefig(pname + ext, dpi=dpi_export, bbox_inches='tight')
pname = '/tmp/panel_B' #fname + '_B'
from shl_scripts import time_plot
variable = 'F'
alpha = .3
subplotpars = dict(left=0.2, right=.95, bottom=0.2, top=.95)#, wspace=0.05, hspace=0.05,)
fig, ax = plt.subplots(1, 1, figsize=(fig_width/2, fig_width/(1+phi)), gridspec_kw=subplotpars)
for i_cv in range(N_cv):
for color, homeo_method in zip(colors, homeo_methods):
ax.axis(c='b', lw=2, axisbg='w')
ax.set_facecolor('w')
if i_cv==0:
fig, ax = time_plot(shl, dico[i_cv][homeo_method], variable=variable, unit='bits', color=color, label=homeo_method, alpha=alpha_0, fig=fig, ax=ax)
else:
fig, ax = time_plot(shl, dico[i_cv][homeo_method], variable=variable, unit='bits', color=color, alpha=alpha, fig=fig, ax=ax)
ax.legend(loc='best')
ax.set_ylim(Flim1, Flim2)
for ext in FORMATS: fig.savefig(pname + ext, dpi=dpi_export, bbox_inches='tight')
if DEBUG: Image(pname +'.png')
if DEBUG: Image(pname +'.png')
%%tikz -f pdf --save {fname}.pdf
\draw[white, fill=white] (0.\linewidth,0) rectangle (1.\linewidth, .382\linewidth) ;
\draw [anchor=north west] (.0\linewidth, .382\linewidth) node {\includegraphics[width=.5\linewidth]{/tmp/panel_A}};
\draw [anchor=north west] (.5\linewidth, .382\linewidth) node {\includegraphics[width=.5\linewidth]{/tmp/panel_B}};
\begin{scope}[font=\bf\sffamily\large]
\draw [anchor=west,fill=white] (.0\linewidth, .382\linewidth) node [above right=-3mm] {$\mathsf{A}$};
\draw [anchor=west,fill=white] (.53\linewidth, .382\linewidth) node [above right=-3mm] {$\mathsf{B}$};
\end{scope}
!convert -density {dpi_export} {fname}.pdf {fname}.jpg
!convert -density {dpi_export} {fname}.pdf {fname}.png
#!convert -density {dpi_export} -resize 5400 -units pixelsperinch -flatten -compress lzw -depth 8 {fname}.pdf {fname}.tiff
Image(fname +'.png')
As a control, we compare the methods for different parameters:
list_figures = []
for homeo_method in homeo_methods:
opts_ = opts.copy()
opts_.update(homeo_method=homeo_method)
experiments = SHL_set(opts_, tag=tag + '_' + homeo_method, base=10)
experiments.run(variables=variables, n_jobs=n_jobs, verbose=0)
import matplotlib.pyplot as plt
subplotpars = dict(left=0.2, right=.95, bottom=0.2, top=.95, wspace=0.5, hspace=0.35,)
x, y = .05, .8 #-.3
UP = 3
fig, axs = plt.subplots(len(variables), 1, figsize=(UP*fig_width/2, UP*fig_width/(1+phi)), gridspec_kw=subplotpars, sharey=True)
for i_ax, variable in enumerate(variables):
for color, homeo_method in zip(colors, homeo_methods):
opts_ = opts.copy()
opts_.update(homeo_method=homeo_method)
experiments = SHL_set(opts_, tag=tag + '_' + homeo_method, base=10)
fig, axs[i_ax] = experiments.scan(variable=variable, list_figures=[], display='final', fig=fig, ax=axs[i_ax], color=color, display_variable='F', verbose=0, label=homeo_method) #
axs[i_ax].set_xlabel('') #variable
axs[i_ax].text(x, y, variable, transform=axs[i_ax].transAxes)
#axs[i_ax].get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
ax.set_ylim(Flim1, Flim2)
fig.legend(loc='lower right')
fname = 'figure_CNN'
!rm -fr /tmp/database/Face_DataBase
!mkdir -p /tmp/database && rsync -a "/Users/laurentperrinet/science/VB_These/Rapport d'avancement/database/Face_DataBase" /tmp/database/
#!mkdir -p /tmp/database/ && rsync -a "/Users/laurentperrinet/science/VB_These/Rapport d'avancement/database/Face_DataBase/Raw_DataBase/*" /tmp/database/Face_DataBase
from CHAMP.DataLoader import LoadData
from CHAMP.DataTools import LocalContrastNormalization, FilterInputData, GenerateMask
from CHAMP.Monitor import DisplayDico, DisplayConvergenceCHAMP, DisplayWhere
import os
datapath = os.path.join("/tmp", "database")
path = os.path.join(datapath, "Face_DataBase/Raw_DataBase")
TrSet, TeSet = LoadData('Face', path, decorrelate=False, resize=(65, 65))
# MP Parameters
nb_dico = 20
width = 9
dico_size = (width, width)
l0 = 20
seed = 42
# Learning Parameters
eta = .05
nb_epoch = 500
TrSet, TeSet = LoadData('Face', path, decorrelate=False, resize=(65, 65))
N_TrSet, _, _, _ = LocalContrastNormalization(TrSet)
Filtered_L_TrSet = FilterInputData(
N_TrSet, sigma=0.25, style='Custom', start_R=15)
mask = GenerateMask(full_size=(nb_dico, 1, width, width), sigma=0.8, style='Gaussian')
from CHAMP.CHAMP_Layer import CHAMP_Layer
from CHAMP.DataTools import SaveNetwork, LoadNetwork
homeo_methods = ['None', 'HAP']
for homeo_method, eta_homeo in zip(homeo_methods, [0., 0.0025]):
ffname = 'cache_dir_CNN/CHAMP_low_' + homeo_method + '.pkl'
try:
L1_mask = LoadNetwork(loading_path=ffname)
except:
L1_mask = CHAMP_Layer(l0_sparseness=l0, nb_dico=nb_dico,
dico_size=dico_size, mask=mask, verbose=1)
dico_mask = L1_mask.TrainLayer(
Filtered_L_TrSet, eta=eta, eta_homeo=eta_homeo, nb_epoch=nb_epoch, seed=seed)
SaveNetwork(Network=L1_mask, saving_path=ffname)
pname = '/tmp/panel_A' #pname = fname + '_A'
subplotpars = dict(left=0.042, right=1., bottom=0., top=1., wspace=0.05, hspace=0.05,)
for color, homeo_method in zip(['black', 'green'], homeo_methods):
#fig, axs = plt.subplots(1, 1, figsize=(fig_width/2, fig_width/(1+phi)), gridspec_kw=subplotpars)
ffname = 'cache_dir_CNN/CHAMP_low_' + homeo_method + '.pkl'
L1_mask = LoadNetwork(loading_path=ffname)
fig, ax = DisplayDico(L1_mask.dictionary)
# ax.set_ylabel(homeo_method)
#for ax in list(axs):
# ax.axis(c=color, lw=2, axisbg='w')
# ax.set_facecolor('w')
ax[0].text(-5, 6, homeo_method, fontsize=8, color=color, rotation=90)#, backgroundcolor='white'
plt.tight_layout( pad=0., w_pad=0., h_pad=.0)
for ext in FORMATS: fig.savefig(pname + '_' + homeo_method + ext, dpi=dpi_export, bbox_inches='tight')
pname = '/tmp/panel_B' #fname + '_B'
from shl_scripts import time_plot
variable = 'F'
alpha = .3
subplotpars = dict(left=0.2, right=.95, bottom=0.2, top=.95)#, wspace=0.05, hspace=0.05,)
for color, homeo_method in zip(['black', 'green'], homeo_methods):
#fig, axs = plt.subplots(1, 1, figsize=(fig_width/2, fig_width/(1+phi)), gridspec_kw=subplotpars)
ffname = 'cache_dir_CNN/CHAMP_low_' + homeo_method + '.pkl'
L1_mask = LoadNetwork(loading_path=ffname)
fig, ax = DisplayConvergenceCHAMP(L1_mask, to_display=['histo'], color=color)
ax.axis(c=color, lw=2, axisbg='w')
ax.set_facecolor('w')
ax.set_ylabel('counts')
ax.set_xlabel('feature #')
ax.set_ylim(0, 560)
#ax.text(-8, 7*dim_graph[0], homeo_method, fontsize=12, color=color, rotation=90)#, backgroundcolor='white'
#ax[0].text(-8, 3, homeo_method, fontsize=12, color=color, rotation=90)#, backgroundcolor='white'
fig.suptitle(f'method={homeo_method}', y=1.15, fontsize=12)
for ext in FORMATS: fig.savefig(pname + '_' + homeo_method + ext, dpi=dpi_export, bbox_inches='tight')
if DEBUG: Image(pname +'.png')
%ls -ltr /tmp/panel_*
%%tikz -f pdf --save {fname}.pdf
\draw[white, fill=white] (0.\linewidth,0) rectangle (1.\linewidth, .382\linewidth) ;
\draw [anchor=north west] (.0\linewidth, .375\linewidth) node {\includegraphics[width=.95\linewidth]{/tmp/panel_A_None}};
\draw [anchor=north west] (.0\linewidth, .300\linewidth) node {\includegraphics[width=.95\linewidth]{/tmp/panel_A_HAP}};
\draw [anchor=north west] (.0\linewidth, .191\linewidth) node {\includegraphics[width=.45\linewidth]{/tmp/panel_B_None}};
\draw [anchor=north west] (.5\linewidth, .191\linewidth) node {\includegraphics[width=.45\linewidth]{/tmp/panel_B_HAP}};
\begin{scope}[font=\bf\sffamily\large]
%\draw [anchor=west,fill=white] (.0\linewidth, .382\linewidth) node [above right=-3mm] {$\mathsf{A}$};
\draw [anchor=west,fill=white] (.0\linewidth, .191\linewidth) node [above right=-3mm] {$\mathsf{A}$};
\draw [anchor=west,fill=white] (.53\linewidth, .191\linewidth) node [above right=-3mm] {$\mathsf{B}$};
\end{scope}
!convert -density {dpi_export} {fname}.pdf {fname}.jpg
!convert -density {dpi_export} {fname}.pdf {fname}.png
#!convert -density {dpi_export} -resize 5400 -units pixelsperinch -flatten -compress lzw -depth 8 {fname}.pdf {fname}.tiff
Image(fname +'.png')
The learning itself is done via a gradient descent but is highly dependent on the coding / decoding algorithm. This belongs to a another function (in the shl_encode.py script)
shl = SHL(**opts)
list_figures = ['show_dico', 'show_Pcum', 'time_plot_F']
dico = shl.learn_dico(data=data, list_figures=list_figures, matname=tag + '_vanilla')
print('size of dictionary = (number of filters, size of imagelets) = ', dico.dictionary.shape)
print('average of filters = ', dico.dictionary.mean(axis=1).mean(),
'+/-', dico.dictionary.mean(axis=1).std())
SE = np.sqrt(np.sum(dico.dictionary**2, axis=1))
print('average energy of filters = ', SE.mean(), '+/-', SE.std())
help(shl)
help(dico)
Loading patches, with or without mask:
White Noise Initialization + Learning
shl = SHL(one_over_F=False, **opts)
dico_w = shl.learn_dico(data=data, matname=tag + '_WHITE', list_figures=[])
shl = SHL(one_over_F=True, **opts)
dico_1oF = shl.learn_dico(data=data, matname=tag + '_OVF', list_figures=[])
fig_error, ax_error = None, None
fig_error, ax_error = shl.time_plot(dico_w, variable='F', fig=fig_error, ax=ax_error, color='blue', label='white noise')
fig_error, ax_error = shl.time_plot(dico_1oF, variable='F', fig=fig_error, ax=ax_error, color='red', label='one over f')
#ax_error.set_ylim((0, .65))
ax_error.legend(loc='best')
We use by defaut the strategy of ADAM, see https://arxiv.org/pdf/1412.6980.pdf
shl = SHL(beta1=0., **opts)
dico_fixed = shl.learn_dico(data=data, matname=tag + '_fixed', list_figures=[])
shl = SHL(**opts)
dico_default = shl.learn_dico(data=data, matname=tag + '_default', list_figures=[])
fig_error, ax_error = None, None
fig_error, ax_error = shl.time_plot(dico_fixed, variable='F', fig=fig_error, ax=ax_error, color='blue', label='fixed')
fig_error, ax_error = shl.time_plot(dico_default, variable='F', fig=fig_error, ax=ax_error, color='red', label='ADAM')
#ax_error.set_ylim((0, .65))
ax_error.legend(loc='best')
As suggested by AnonReviewer3, we have tested how the convergence was modified by changing the number of neurons. By comparing different numbers of neurons we could re-draw the same figures for the convergence of the algorithm as in our original figures. In addition, we have also checked that this result will hold on a range of sparsity levels. In particular, we found that in general, increasing the l0_sparseness parameter, the convergence took progressively longer. Importantly, we could see that in both cases, this did not depend on the kind of homeostasis heuristic chosen, proving the generality of our results.
This is shown in the supplementary material that we have added to our revision ("Testing different number of neurons and sparsity") . This useful extension proves the originality of our work as highlighted in point 4, and the generality of these results compared to the parameters of the network.
#from shl_scripts.shl_experiments import SHL_set
#homeo_methods = ['None', 'OLS', 'HEH']
homeo_methods = ['None', 'EMP', 'HAP', 'HEH', 'OLS']
variables = ['l0_sparseness', 'n_dictionary']
list_figures = []
#n_dictionary=21**2
for homeo_method in homeo_methods:
opts_ = opts.copy()
opts_.update(homeo_method=homeo_method, datapath=datapath)
experiments = SHL_set(opts_, tag=tag + '_' + homeo_method)
experiments.run(variables=variables, n_jobs=1, verbose=0)
fig, axs = plt.subplots(len(variables), 1, figsize=(fig_width/2, fig_width/(1+phi)), gridspec_kw=subplotpars, sharey=True)
for i_ax, variable in enumerate(variables):
for color, homeo_method in zip(colors, homeo_methods):
opts_ = opts.copy()
opts_.update(homeo_method=homeo_method, datapath=datapath)
experiments = SHL_set(opts_, tag=tag + '_' + homeo_method)
fig, axs[i_ax] = experiments.scan(variable=variable, list_figures=[], display='final', fig=fig, ax=axs[i_ax], color=color, display_variable='F', verbose=0) #, label=homeo_metho
axs[i_ax].set_xlabel('') #variable
axs[i_ax].text(.1, .8, variable, transform=axs[i_ax].transAxes)
#axs[i_ax].get_xaxis().set_major_formatter(matplotlib.ticker.ScalarFormatter())
from CHAMP.DataLoader import LoadData
from CHAMP.DataTools import LocalContrastNormalization, FilterInputData, GenerateMask
from CHAMP.Monitor import DisplayDico, DisplayConvergenceCHAMP, DisplayWhere
import os
home = os.getenv('HOME')
datapath = os.path.join("/tmp", "database")
path = os.path.join(datapath, "Raw_DataBase")
TrSet, TeSet = LoadData('Face', path, decorrelate=False, resize=(65, 65))
to_display = TrSet[0][0, 0:10, :, :, :]
print('Size=', TrSet[0].shape)
DisplayDico(to_display)
# MP Parameters
nb_dico = 20
width = 9
dico_size = (width, width)
l0 = 20
seed = 42
# Learning Parameters
eta = .05
nb_epoch = 500
TrSet, TeSet = LoadData('Face', path, decorrelate=False, resize=(65, 65))
N_TrSet, _, _, _ = LocalContrastNormalization(TrSet)
Filtered_L_TrSet = FilterInputData(
N_TrSet, sigma=0.25, style='Custom', start_R=15)
to_display = Filtered_L_TrSet[0][0, 0:10, :, :, :]
DisplayDico(to_display)
mask = GenerateMask(full_size=(nb_dico, 1, width, width), sigma=0.8, style='Gaussian')
DisplayDico(mask)
from CHAMP.CHAMP_Layer import CHAMP_Layer
from CHAMP.DataTools import SaveNetwork, LoadNetwork
fname = 'cache_dir_CNN/CHAMP_low_None.pkl'
try:
L1_mask = LoadNetwork(loading_path=fname)
except:
L1_mask = CHAMP_Layer(l0_sparseness=l0, nb_dico=nb_dico,
dico_size=dico_size, mask=mask, verbose=2)
dico_mask = L1_mask.TrainLayer(
Filtered_L_TrSet, eta=eta, nb_epoch=nb_epoch, seed=seed)
SaveNetwork(Network=L1_mask, saving_path=fname)
DisplayDico(L1_mask.dictionary)
DisplayConvergenceCHAMP(L1_mask, to_display=['error', 'histo'])
DisplayWhere(L1_mask.where)
fname = 'cache_dir_CNN/CHAMP_low_HAP.pkl'
try:
L1_mask = LoadNetwork(loading_path=fname)
except:
# Learning Parameters
eta_homeo = 0.0025
L1_mask = CHAMP_Layer(l0_sparseness=l0, nb_dico=nb_dico,
dico_size=dico_size, mask=mask, verbose=1)
dico_mask = L1_mask.TrainLayer(
Filtered_L_TrSet, eta=eta, eta_homeo=eta_homeo, nb_epoch=nb_epoch, seed=seed)
SaveNetwork(Network=L1_mask, saving_path=fname)
DisplayDico(L1_mask.dictionary)
DisplayConvergenceCHAMP(L1_mask, to_display=['error'])
DisplayConvergenceCHAMP(L1_mask, to_display=['histo'])
DisplayWhere(L1_mask.where)
from CHAMP.DataTools import Rebuilt
import torch
rebuilt_image = Rebuilt(torch.FloatTensor(L1_mask.code), L1_mask.dictionary)
DisplayDico(rebuilt_image[0:10, :, :, :]);
We train higher-level feature vectors by forcing the network to :
fname = 'cache_dir_CNN/CHAMP_high_None.pkl'
try:
L1_mask = LoadNetwork(loading_path=fname)
except:
nb_dico = 60
width = 19
dico_size = (width, width)
l0 = 5
mask = GenerateMask(full_size=(nb_dico, 1, width, width), sigma=0.8, style='Gaussian')
# Learning Parameters
eta_homeo = 0.0
eta = .05
nb_epoch = 500
# learn
L1_mask = CHAMP_Layer(l0_sparseness=l0, nb_dico=nb_dico,
dico_size=dico_size, mask=mask, verbose=0)
dico_mask = L1_mask.TrainLayer(
Filtered_L_TrSet, eta=eta, eta_homeo=eta_homeo, nb_epoch=nb_epoch, seed=seed)
SaveNetwork(Network=L1_mask, saving_path=fname)
DisplayDico(L1_mask.dictionary)
DisplayConvergenceCHAMP(L1_mask, to_display=['error'])
DisplayConvergenceCHAMP(L1_mask, to_display=['histo'])
DisplayWhere(L1_mask.where);
fname = 'cache_dir_CNN/CHAMP_high_HAP.pkl'
try:
L1_mask = LoadNetwork(loading_path=fname)
except:
nb_dico = 60
width = 19
dico_size = (width, width)
l0 = 5
mask = GenerateMask(full_size=(nb_dico, 1, width, width), sigma=0.8, style='Gaussian')
# Learning Parameters
eta_homeo = 0.0025
eta = .05
nb_epoch = 500
# learn
L1_mask = CHAMP_Layer(l0_sparseness=l0, nb_dico=nb_dico,
dico_size=dico_size, mask=mask, verbose=0)
dico_mask = L1_mask.TrainLayer(
Filtered_L_TrSet, eta=eta, eta_homeo=eta_homeo, nb_epoch=nb_epoch, seed=seed)
SaveNetwork(Network=L1_mask, saving_path=fname)
DisplayDico(L1_mask.dictionary)
DisplayConvergenceCHAMP(L1_mask, to_display=['error'])
DisplayConvergenceCHAMP(L1_mask, to_display=['histo'])
DisplayWhere(L1_mask.where);
!ls -l {shl.cache_dir}/{tag}*
#!rm {shl.cache_dir}/{tag}*lock*
#!rm {shl.cache_dir}/{tag}*
#!ls -l {shl.cache_dir}/{tag}*
%run model.py {tag} 0
%load_ext watermark
%watermark -i -h -m -v -p numpy,matplotlib,shl_scripts
!jupyter nbconvert --to html_embed Annex.ipynb --output=index.html
#!jupyter-nbconvert --template report --to pdf Annex.ipynb
#!pandoc Annex.html -o Annex.pdf
#!/Applications/Chromium.app/Contents/MacOS/Chromium --headless --disable-gpu --print-to-pdf=Annex.pdf file:///tmp/Annex.html
#!zip Annex.zip Annex.html
!git status
!git pull
!git commit -am' {tag} : re-running notebooks'
!git push